diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -766,50 +766,6 @@ !eq(nf, 8): !if(signed, "vvvvvvvv", "UvUvUvUvUvUvUvUv")); } -multiclass RVVStridedSegStore { - foreach type = TypeList in { - defvar eew = !cond(!eq(type, "c") : "8", - !eq(type, "s") : "16", - !eq(type, "i") : "32", - !eq(type, "l") : "64", - !eq(type, "x") : "16", - !eq(type, "f") : "32", - !eq(type, "d") : "64"); - foreach nf = NFList in { - let Name = op # nf # "e" # eew # "_v", - IRName = op # nf, - MaskedIRName = op # nf # "_mask", - NF = nf, - HasMaskedOffOperand = false, - MaskedPolicyScheme = NonePolicy, - ManualCodegen = [{ - { - if (IsMasked) { - // Builtin: (mask, ptr, stride, val0, val1, ..., vl). - // Intrinsic: (val0, val1, ..., ptr, stride, mask, vl) - std::rotate(Ops.begin(), Ops.begin() + 3, Ops.end() - 1); - std::rotate(Ops.begin() + NF, Ops.begin() + NF + 1, Ops.begin() + NF + 3); - assert(Ops.size() == NF + 4); - } else { - // Builtin: (ptr, stride, val0, val1, ..., vl). - // Intrinsic: (val0, val1, ..., ptr, stride, vl) - std::rotate(Ops.begin(), Ops.begin() + 2, Ops.end() - 1); - assert(Ops.size() == NF + 3); - } - IntrinsicTypes = {Ops[0]->getType(), Ops[NF + 1]->getType()}; - } - }] in { - defvar V = VString.S; - defvar UV = VString.S; - def : RVVBuiltin<"v", "0Pet" # V, type>; - if !not(IsFloat.val) then { - def : RVVBuiltin<"Uv", "0PUet" # UV, type>; - } - } - } - } -} - multiclass RVVIndexedSegStore { foreach type = TypeList in { foreach eew_info = EEWList in { @@ -1510,8 +1466,7 @@ !eq(type, "f") : "32", !eq(type, "d") : "64"); foreach nf = NFList in { - let Name = op # nf # "e" # eew # "_v_tuple", - OverloadedName = op # nf # "e" # eew # "_tuple", + let Name = op # nf # "e" # eew # "_v", IRName = op # nf, MaskedIRName = op # nf # "_mask", NF = nf, @@ -1687,7 +1642,6 @@ let UnMaskedPolicyScheme = NonePolicy, MaskedPolicyScheme = NonePolicy in { -defm : RVVStridedSegStore<"vssseg">; defm : RVVIndexedSegStore<"vsuxseg">; defm : RVVIndexedSegStore<"vsoxseg">; } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e16.c @@ -7,303 +7,423 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vssseg2e16_v_f16mf4(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_f16mf4x2(_Float16 *base, ptrdiff_t bstride, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_f16mf4x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2f16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vssseg2e16_v_f16mf2(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_f16mf2x2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_f16mf2x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4f16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vssseg2e16_v_f16m1(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_f16m1x2(_Float16 *base, ptrdiff_t bstride, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_f16m1x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8f16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vssseg2e16_v_f16m2(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_f16m2x2(_Float16 *base, ptrdiff_t bstride, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_f16m2x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16f16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_f16m4(_Float16 *base, ptrdiff_t bstride, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vssseg2e16_v_f16m4(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_f16m4x2(_Float16 *base, ptrdiff_t bstride, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_f16m4x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vssseg2e16_v_i16mf4(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_i16mf4x2(int16_t *base, ptrdiff_t bstride, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_i16mf4x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vssseg2e16_v_i16mf2(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_i16mf2x2(int16_t *base, ptrdiff_t bstride, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_i16mf2x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vssseg2e16_v_i16m1(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_i16m1x2(int16_t *base, ptrdiff_t bstride, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_i16m1x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vssseg2e16_v_i16m2(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_i16m2x2(int16_t *base, ptrdiff_t bstride, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_i16m2x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16m4(int16_t *base, ptrdiff_t bstride, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vssseg2e16_v_i16m4(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_i16m4x2(int16_t *base, ptrdiff_t bstride, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_i16m4x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vssseg2e16_v_u16mf4(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_u16mf4x2(uint16_t *base, ptrdiff_t bstride, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_u16mf4x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vssseg2e16_v_u16mf2(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_u16mf2x2(uint16_t *base, ptrdiff_t bstride, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_u16mf2x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vssseg2e16_v_u16m1(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_u16m1x2(uint16_t *base, ptrdiff_t bstride, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_u16m1x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vssseg2e16_v_u16m2(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_u16m2x2(uint16_t *base, ptrdiff_t bstride, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_u16m2x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16m4(uint16_t *base, ptrdiff_t bstride, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vssseg2e16_v_u16m4(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_u16m4x2(uint16_t *base, ptrdiff_t bstride, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_u16m4x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vssseg2e16_v_f16mf4_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_f16mf4x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2f16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vssseg2e16_v_f16mf2_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_f16mf2x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4f16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vssseg2e16_v_f16m1_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_f16m1x2_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_f16m1x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8f16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vssseg2e16_v_f16m2_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_f16m2x2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_f16m2x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16f16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_f16m4_m(vbool4_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vssseg2e16_v_f16m4_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_f16m4x2_m(vbool4_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_f16m4x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vssseg2e16_v_i16mf4_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_i16mf4x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vssseg2e16_v_i16mf2_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_i16mf2x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vssseg2e16_v_i16m1_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_i16m1x2_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_i16m1x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vssseg2e16_v_i16m2_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_i16m2x2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_i16m2x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16m4_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vssseg2e16_v_i16m4_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_i16m4x2_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_i16m4x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vssseg2e16_v_u16mf4_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_u16mf4x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vssseg2e16_v_u16mf2_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_u16mf2x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vssseg2e16_v_u16m1_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_u16m1x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vssseg2e16_v_u16m2_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_u16m2x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16m4_m(vbool4_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vssseg2e16_v_u16m4_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16_v_u16m4x2_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e32.c @@ -7,243 +7,339 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vssseg2e32_v_f32mf2(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_f32mf2x2(float *base, ptrdiff_t bstride, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_f32mf2x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2f32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vssseg2e32_v_f32m1(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_f32m1x2(float *base, ptrdiff_t bstride, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_f32m1x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4f32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vssseg2e32_v_f32m2(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_f32m2x2(float *base, ptrdiff_t bstride, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_f32m2x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8f32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32m4(float *base, ptrdiff_t bstride, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vssseg2e32_v_f32m4(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_f32m4x2(float *base, ptrdiff_t bstride, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_f32m4x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vssseg2e32_v_i32mf2(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_i32mf2x2(int32_t *base, ptrdiff_t bstride, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_i32mf2x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vssseg2e32_v_i32m1(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_i32m1x2(int32_t *base, ptrdiff_t bstride, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_i32m1x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vssseg2e32_v_i32m2(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_i32m2x2(int32_t *base, ptrdiff_t bstride, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_i32m2x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32m4(int32_t *base, ptrdiff_t bstride, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vssseg2e32_v_i32m4(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_i32m4x2(int32_t *base, ptrdiff_t bstride, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_i32m4x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vssseg2e32_v_u32mf2(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_u32mf2x2(uint32_t *base, ptrdiff_t bstride, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_u32mf2x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vssseg2e32_v_u32m1(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_u32m1x2(uint32_t *base, ptrdiff_t bstride, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_u32m1x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vssseg2e32_v_u32m2(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_u32m2x2(uint32_t *base, ptrdiff_t bstride, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_u32m2x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32m4(uint32_t *base, ptrdiff_t bstride, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vssseg2e32_v_u32m4(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_u32m4x2(uint32_t *base, ptrdiff_t bstride, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_u32m4x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vssseg2e32_v_f32mf2_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_f32mf2x2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_f32mf2x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2f32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vssseg2e32_v_f32m1_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_f32m1x2_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_f32m1x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4f32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vssseg2e32_v_f32m2_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_f32m2x2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_f32m2x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8f32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32m4_m(vbool8_t mask, float *base, ptrdiff_t bstride, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vssseg2e32_v_f32m4_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_f32m4x2_m(vbool8_t mask, float *base, ptrdiff_t bstride, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_f32m4x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vssseg2e32_v_i32mf2_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_i32mf2x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vssseg2e32_v_i32m1_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_i32m1x2_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_i32m1x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vssseg2e32_v_i32m2_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_i32m2x2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_i32m2x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32m4_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vssseg2e32_v_i32m4_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_i32m4x2_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_i32m4x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vssseg2e32_v_u32mf2_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_u32mf2x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vssseg2e32_v_u32m1_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_u32m1x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vssseg2e32_v_u32m2_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_u32m2x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32m4_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vssseg2e32_v_u32m4_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_u32m4x2_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e32_tuple.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e32_tuple.c deleted file mode 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e32_tuple.c +++ /dev/null @@ -1,345 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 -// REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ -// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ -// RUN: FileCheck --check-prefix=CHECK-RV64 %s - -#include - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_f32mf2x2(float *base, ptrdiff_t bstride, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_f32mf2x2(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_f32m1x2(float *base, ptrdiff_t bstride, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_f32m1x2(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_f32m2x2(float *base, ptrdiff_t bstride, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_f32m2x2(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_f32m4x2(float *base, ptrdiff_t bstride, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_f32m4x2(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_i32mf2x2(int32_t *base, ptrdiff_t bstride, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_i32mf2x2(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_i32m1x2(int32_t *base, ptrdiff_t bstride, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_i32m1x2(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_i32m2x2(int32_t *base, ptrdiff_t bstride, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_i32m2x2(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_i32m4x2(int32_t *base, ptrdiff_t bstride, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_i32m4x2(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_u32mf2x2(uint32_t *base, ptrdiff_t bstride, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_u32mf2x2(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_u32m1x2(uint32_t *base, ptrdiff_t bstride, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_u32m1x2(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_u32m2x2(uint32_t *base, ptrdiff_t bstride, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_u32m2x2(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_u32m4x2(uint32_t *base, ptrdiff_t bstride, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_u32m4x2(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_f32mf2x2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_f32mf2x2_m(mask, base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_f32m1x2_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_f32m1x2_m(mask, base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_f32m2x2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_f32m2x2_m(mask, base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_f32m4x2_m(vbool8_t mask, float *base, ptrdiff_t bstride, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_f32m4x2_m(mask, base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_i32mf2x2_m(mask, base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_i32m1x2_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_i32m1x2_m(mask, base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_i32m2x2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_i32m2x2_m(mask, base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_i32m4x2_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_i32m4x2_m(mask, base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_u32mf2x2_m(mask, base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_u32m1x2_m(mask, base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_u32m2x2_m(mask, base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_u32m4x2_m(mask, base, bstride, v_tuple, vl); -} - diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e64.c @@ -7,183 +7,255 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vssseg2e64_v_f64m1(base, bstride, v0, v1, vl); +void test_vssseg2e64_v_f64m1x2(double *base, ptrdiff_t bstride, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64_v_f64m1x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2f64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vssseg2e64_v_f64m2(base, bstride, v0, v1, vl); +void test_vssseg2e64_v_f64m2x2(double *base, ptrdiff_t bstride, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64_v_f64m2x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4f64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_f64m4(double *base, ptrdiff_t bstride, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vssseg2e64_v_f64m4(base, bstride, v0, v1, vl); +void test_vssseg2e64_v_f64m4x2(double *base, ptrdiff_t bstride, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64_v_f64m4x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vssseg2e64_v_i64m1(base, bstride, v0, v1, vl); +void test_vssseg2e64_v_i64m1x2(int64_t *base, ptrdiff_t bstride, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64_v_i64m1x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vssseg2e64_v_i64m2(base, bstride, v0, v1, vl); +void test_vssseg2e64_v_i64m2x2(int64_t *base, ptrdiff_t bstride, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64_v_i64m2x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_i64m4(int64_t *base, ptrdiff_t bstride, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vssseg2e64_v_i64m4(base, bstride, v0, v1, vl); +void test_vssseg2e64_v_i64m4x2(int64_t *base, ptrdiff_t bstride, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64_v_i64m4x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vssseg2e64_v_u64m1(base, bstride, v0, v1, vl); +void test_vssseg2e64_v_u64m1x2(uint64_t *base, ptrdiff_t bstride, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64_v_u64m1x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vssseg2e64_v_u64m2(base, bstride, v0, v1, vl); +void test_vssseg2e64_v_u64m2x2(uint64_t *base, ptrdiff_t bstride, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64_v_u64m2x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_u64m4(uint64_t *base, ptrdiff_t bstride, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vssseg2e64_v_u64m4(base, bstride, v0, v1, vl); +void test_vssseg2e64_v_u64m4x2(uint64_t *base, ptrdiff_t bstride, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64_v_u64m4x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vssseg2e64_v_f64m1_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e64_v_f64m1x2_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64_v_f64m1x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2f64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vssseg2e64_v_f64m2_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e64_v_f64m2x2_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64_v_f64m2x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4f64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_f64m4_m(vbool16_t mask, double *base, ptrdiff_t bstride, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vssseg2e64_v_f64m4_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e64_v_f64m4x2_m(vbool16_t mask, double *base, ptrdiff_t bstride, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64_v_f64m4x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vssseg2e64_v_i64m1_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e64_v_i64m1x2_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64_v_i64m1x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vssseg2e64_v_i64m2_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e64_v_i64m2x2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64_v_i64m2x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_i64m4_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vssseg2e64_v_i64m4_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e64_v_i64m4x2_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64_v_i64m4x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vssseg2e64_v_u64m1_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64_v_u64m1x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vssseg2e64_v_u64m2_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64_v_u64m2x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_u64m4_m(vbool16_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vssseg2e64_v_u64m4_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e64_v_u64m4x2_m(vbool16_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64_v_u64m4x2_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e8.c @@ -1,248 +1,345 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vssseg2e8_v_i8mf8(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8mf8x2(int8_t *base, ptrdiff_t bstride, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_i8mf8x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vssseg2e8_v_i8mf4(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8mf4x2(int8_t *base, ptrdiff_t bstride, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_i8mf4x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vssseg2e8_v_i8mf2(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8mf2x2(int8_t *base, ptrdiff_t bstride, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_i8mf2x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vssseg2e8_v_i8m1(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8m1x2(int8_t *base, ptrdiff_t bstride, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_i8m1x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vssseg2e8_v_i8m2(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8m2x2(int8_t *base, ptrdiff_t bstride, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_i8m2x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8m4(int8_t *base, ptrdiff_t bstride, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return __riscv_vssseg2e8_v_i8m4(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8m4x2(int8_t *base, ptrdiff_t bstride, vint8m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_i8m4x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vssseg2e8_v_u8mf8(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8mf8x2(uint8_t *base, ptrdiff_t bstride, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_u8mf8x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vssseg2e8_v_u8mf4(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8mf4x2(uint8_t *base, ptrdiff_t bstride, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_u8mf4x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vssseg2e8_v_u8mf2(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8mf2x2(uint8_t *base, ptrdiff_t bstride, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_u8mf2x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vssseg2e8_v_u8m1(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8m1x2(uint8_t *base, ptrdiff_t bstride, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_u8m1x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vssseg2e8_v_u8m2(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8m2x2(uint8_t *base, ptrdiff_t bstride, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_u8m2x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8m4(uint8_t *base, ptrdiff_t bstride, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return __riscv_vssseg2e8_v_u8m4(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8m4x2(uint8_t *base, ptrdiff_t bstride, vuint8m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_u8m4x2(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vssseg2e8_v_i8mf8_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_i8mf8x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vssseg2e8_v_i8mf4_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_i8mf4x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vssseg2e8_v_i8mf2_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_i8mf2x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vssseg2e8_v_i8m1_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8m1x2_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_i8m1x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vssseg2e8_v_i8m2_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8m2x2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_i8m2x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8m4_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return __riscv_vssseg2e8_v_i8m4_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8m4x2_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride, vint8m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_i8m4x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vssseg2e8_v_u8mf8_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_u8mf8x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vssseg2e8_v_u8mf4_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_u8mf4x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vssseg2e8_v_u8mf2_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_u8mf2x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vssseg2e8_v_u8m1_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_u8m1x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vssseg2e8_v_u8m2_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_u8m2x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8m4_m(vbool2_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return __riscv_vssseg2e8_v_u8m4_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8_v_u8m4x2_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e16.c @@ -7,243 +7,387 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1f16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vssseg3e16_v_f16mf4(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_f16mf4x3(_Float16 *base, ptrdiff_t bstride, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_f16mf4x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2f16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vssseg3e16_v_f16mf2(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_f16mf2x3(_Float16 *base, ptrdiff_t bstride, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_f16mf2x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4f16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vssseg3e16_v_f16m1(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_f16m1x3(_Float16 *base, ptrdiff_t bstride, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_f16m1x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8f16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vssseg3e16_v_f16m2(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_f16m2x3(_Float16 *base, ptrdiff_t bstride, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_f16m2x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vssseg3e16_v_i16mf4(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_i16mf4x3(int16_t *base, ptrdiff_t bstride, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_i16mf4x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vssseg3e16_v_i16mf2(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_i16mf2x3(int16_t *base, ptrdiff_t bstride, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_i16mf2x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vssseg3e16_v_i16m1(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_i16m1x3(int16_t *base, ptrdiff_t bstride, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_i16m1x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vssseg3e16_v_i16m2(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_i16m2x3(int16_t *base, ptrdiff_t bstride, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_i16m2x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vssseg3e16_v_u16mf4(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_u16mf4x3(uint16_t *base, ptrdiff_t bstride, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_u16mf4x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vssseg3e16_v_u16mf2(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_u16mf2x3(uint16_t *base, ptrdiff_t bstride, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_u16mf2x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vssseg3e16_v_u16m1(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_u16m1x3(uint16_t *base, ptrdiff_t bstride, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_u16m1x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vssseg3e16_v_u16m2(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_u16m2x3(uint16_t *base, ptrdiff_t bstride, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_u16m2x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1f16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vssseg3e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_f16mf4x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2f16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vssseg3e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_f16mf2x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4f16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vssseg3e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_f16m1x3_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_f16m1x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8f16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vssseg3e16_v_f16m2_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_f16m2x3_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_f16m2x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vssseg3e16_v_i16mf4_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_i16mf4x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vssseg3e16_v_i16mf2_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_i16mf2x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vssseg3e16_v_i16m1_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_i16m1x3_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_i16m1x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vssseg3e16_v_i16m2_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_i16m2x3_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_i16m2x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vssseg3e16_v_u16mf4_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_u16mf4x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vssseg3e16_v_u16mf2_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_u16mf2x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vssseg3e16_v_u16m1_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_u16m1x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vssseg3e16_v_u16m2_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16_v_u16m2x3_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e32.c @@ -7,183 +7,291 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1f32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vssseg3e32_v_f32mf2(base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_f32mf2x3(float *base, ptrdiff_t bstride, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32_v_f32mf2x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2f32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vssseg3e32_v_f32m1(base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_f32m1x3(float *base, ptrdiff_t bstride, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32_v_f32m1x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4f32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vssseg3e32_v_f32m2(base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_f32m2x3(float *base, ptrdiff_t bstride, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32_v_f32m2x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vssseg3e32_v_i32mf2(base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_i32mf2x3(int32_t *base, ptrdiff_t bstride, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32_v_i32mf2x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vssseg3e32_v_i32m1(base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_i32m1x3(int32_t *base, ptrdiff_t bstride, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32_v_i32m1x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vssseg3e32_v_i32m2(base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_i32m2x3(int32_t *base, ptrdiff_t bstride, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32_v_i32m2x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vssseg3e32_v_u32mf2(base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_u32mf2x3(uint32_t *base, ptrdiff_t bstride, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32_v_u32mf2x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vssseg3e32_v_u32m1(base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_u32m1x3(uint32_t *base, ptrdiff_t bstride, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32_v_u32m1x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vssseg3e32_v_u32m2(base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_u32m2x3(uint32_t *base, ptrdiff_t bstride, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32_v_u32m2x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1f32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vssseg3e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_f32mf2x3_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32_v_f32mf2x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2f32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vssseg3e32_v_f32m1_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_f32m1x3_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32_v_f32m1x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4f32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vssseg3e32_v_f32m2_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_f32m2x3_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32_v_f32m2x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vssseg3e32_v_i32mf2_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32_v_i32mf2x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vssseg3e32_v_i32m1_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_i32m1x3_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32_v_i32m1x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vssseg3e32_v_i32m2_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_i32m2x3_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32_v_i32m2x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vssseg3e32_v_u32mf2_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32_v_u32mf2x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vssseg3e32_v_u32m1_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32_v_u32m1x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vssseg3e32_v_u32m2_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32_v_u32m2x3_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e64.c @@ -7,123 +7,195 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_f64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1f64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vssseg3e64_v_f64m1(base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_f64m1x3(double *base, ptrdiff_t bstride, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64_v_f64m1x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_f64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2f64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vssseg3e64_v_f64m2(base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_f64m2x3(double *base, ptrdiff_t bstride, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64_v_f64m2x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_i64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vssseg3e64_v_i64m1(base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_i64m1x3(int64_t *base, ptrdiff_t bstride, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64_v_i64m1x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_i64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vssseg3e64_v_i64m2(base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_i64m2x3(int64_t *base, ptrdiff_t bstride, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64_v_i64m2x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_u64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vssseg3e64_v_u64m1(base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_u64m1x3(uint64_t *base, ptrdiff_t bstride, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64_v_u64m1x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_u64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vssseg3e64_v_u64m2(base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_u64m2x3(uint64_t *base, ptrdiff_t bstride, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64_v_u64m2x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_f64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1f64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vssseg3e64_v_f64m1_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_f64m1x3_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64_v_f64m1x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_f64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2f64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vssseg3e64_v_f64m2_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_f64m2x3_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64_v_f64m2x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_i64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vssseg3e64_v_i64m1_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_i64m1x3_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64_v_i64m1x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_i64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vssseg3e64_v_i64m2_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_i64m2x3_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64_v_i64m2x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_u64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vssseg3e64_v_u64m1_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64_v_u64m1x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_u64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vssseg3e64_v_u64m2_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_u64m2x3_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64_v_u64m2x3_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e8.c @@ -1,208 +1,329 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vssseg3e8_v_i8mf8(base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_i8mf8x3(int8_t *base, ptrdiff_t bstride, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8_v_i8mf8x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vssseg3e8_v_i8mf4(base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_i8mf4x3(int8_t *base, ptrdiff_t bstride, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8_v_i8mf4x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vssseg3e8_v_i8mf2(base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_i8mf2x3(int8_t *base, ptrdiff_t bstride, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8_v_i8mf2x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vssseg3e8_v_i8m1(base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_i8m1x3(int8_t *base, ptrdiff_t bstride, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8_v_i8m1x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vssseg3e8_v_i8m2(base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_i8m2x3(int8_t *base, ptrdiff_t bstride, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8_v_i8m2x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vssseg3e8_v_u8mf8(base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_u8mf8x3(uint8_t *base, ptrdiff_t bstride, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8_v_u8mf8x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vssseg3e8_v_u8mf4(base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_u8mf4x3(uint8_t *base, ptrdiff_t bstride, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8_v_u8mf4x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vssseg3e8_v_u8mf2(base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_u8mf2x3(uint8_t *base, ptrdiff_t bstride, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8_v_u8mf2x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vssseg3e8_v_u8m1(base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_u8m1x3(uint8_t *base, ptrdiff_t bstride, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8_v_u8m1x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vssseg3e8_v_u8m2(base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_u8m2x3(uint8_t *base, ptrdiff_t bstride, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8_v_u8m2x3(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vssseg3e8_v_i8mf8_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8_v_i8mf8x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vssseg3e8_v_i8mf4_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8_v_i8mf4x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vssseg3e8_v_i8mf2_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8_v_i8mf2x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vssseg3e8_v_i8m1_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_i8m1x3_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8_v_i8m1x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vssseg3e8_v_i8m2_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_i8m2x3_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8_v_i8m2x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vssseg3e8_v_u8mf8_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8_v_u8mf8x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vssseg3e8_v_u8mf4_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8_v_u8mf4x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vssseg3e8_v_u8mf2_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8_v_u8mf2x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vssseg3e8_v_u8m1_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8_v_u8m1x3_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vssseg3e8_v_u8m2_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8_v_u8m2x3_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e16.c @@ -7,243 +7,435 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vssseg4e16_v_f16mf4(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_f16mf4x4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_f16mf4x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vssseg4e16_v_f16mf2(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_f16mf2x4(_Float16 *base, ptrdiff_t bstride, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_f16mf2x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vssseg4e16_v_f16m1(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_f16m1x4(_Float16 *base, ptrdiff_t bstride, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_f16m1x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vssseg4e16_v_f16m2(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_f16m2x4(_Float16 *base, ptrdiff_t bstride, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_f16m2x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vssseg4e16_v_i16mf4(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_i16mf4x4(int16_t *base, ptrdiff_t bstride, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_i16mf4x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vssseg4e16_v_i16mf2(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_i16mf2x4(int16_t *base, ptrdiff_t bstride, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_i16mf2x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vssseg4e16_v_i16m1(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_i16m1x4(int16_t *base, ptrdiff_t bstride, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_i16m1x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vssseg4e16_v_i16m2(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_i16m2x4(int16_t *base, ptrdiff_t bstride, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_i16m2x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vssseg4e16_v_u16mf4(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_u16mf4x4(uint16_t *base, ptrdiff_t bstride, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_u16mf4x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vssseg4e16_v_u16mf2(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_u16mf2x4(uint16_t *base, ptrdiff_t bstride, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_u16mf2x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vssseg4e16_v_u16m1(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_u16m1x4(uint16_t *base, ptrdiff_t bstride, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_u16m1x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vssseg4e16_v_u16m2(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_u16m2x4(uint16_t *base, ptrdiff_t bstride, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_u16m2x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vssseg4e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_f16mf4x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vssseg4e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_f16mf2x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vssseg4e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_f16m1x4_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_f16m1x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vssseg4e16_v_f16m2_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_f16m2x4_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_f16m2x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vssseg4e16_v_i16mf4_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_i16mf4x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vssseg4e16_v_i16mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_i16mf2x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vssseg4e16_v_i16m1_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_i16m1x4_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_i16m1x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vssseg4e16_v_i16m2_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_i16m2x4_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_i16m2x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vssseg4e16_v_u16mf4_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_u16mf4x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vssseg4e16_v_u16mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_u16mf2x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vssseg4e16_v_u16m1_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_u16m1x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vssseg4e16_v_u16m2_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16_v_u16m2x4_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e32.c @@ -7,183 +7,327 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vssseg4e32_v_f32mf2(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_f32mf2x4(float *base, ptrdiff_t bstride, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32_v_f32mf2x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vssseg4e32_v_f32m1(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_f32m1x4(float *base, ptrdiff_t bstride, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32_v_f32m1x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vssseg4e32_v_f32m2(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_f32m2x4(float *base, ptrdiff_t bstride, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32_v_f32m2x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vssseg4e32_v_i32mf2(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_i32mf2x4(int32_t *base, ptrdiff_t bstride, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32_v_i32mf2x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vssseg4e32_v_i32m1(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_i32m1x4(int32_t *base, ptrdiff_t bstride, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32_v_i32m1x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vssseg4e32_v_i32m2(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_i32m2x4(int32_t *base, ptrdiff_t bstride, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32_v_i32m2x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vssseg4e32_v_u32mf2(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_u32mf2x4(uint32_t *base, ptrdiff_t bstride, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32_v_u32mf2x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vssseg4e32_v_u32m1(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_u32m1x4(uint32_t *base, ptrdiff_t bstride, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32_v_u32m1x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vssseg4e32_v_u32m2(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_u32m2x4(uint32_t *base, ptrdiff_t bstride, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32_v_u32m2x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vssseg4e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_f32mf2x4_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32_v_f32mf2x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vssseg4e32_v_f32m1_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_f32m1x4_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32_v_f32m1x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vssseg4e32_v_f32m2_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_f32m2x4_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32_v_f32m2x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vssseg4e32_v_i32mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32_v_i32mf2x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vssseg4e32_v_i32m1_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_i32m1x4_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32_v_i32m1x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vssseg4e32_v_i32m2_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_i32m2x4_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32_v_i32m2x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vssseg4e32_v_u32mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32_v_u32mf2x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vssseg4e32_v_u32m1_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32_v_u32m1x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vssseg4e32_v_u32m2_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32_v_u32m2x4_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e64.c @@ -7,123 +7,219 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_f64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vssseg4e64_v_f64m1(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_f64m1x4(double *base, ptrdiff_t bstride, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64_v_f64m1x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_f64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vssseg4e64_v_f64m2(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_f64m2x4(double *base, ptrdiff_t bstride, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64_v_f64m2x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_i64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vssseg4e64_v_i64m1(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_i64m1x4(int64_t *base, ptrdiff_t bstride, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64_v_i64m1x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_i64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vssseg4e64_v_i64m2(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_i64m2x4(int64_t *base, ptrdiff_t bstride, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64_v_i64m2x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_u64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vssseg4e64_v_u64m1(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_u64m1x4(uint64_t *base, ptrdiff_t bstride, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64_v_u64m1x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_u64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vssseg4e64_v_u64m2(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_u64m2x4(uint64_t *base, ptrdiff_t bstride, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64_v_u64m2x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_f64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vssseg4e64_v_f64m1_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_f64m1x4_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64_v_f64m1x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_f64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vssseg4e64_v_f64m2_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_f64m2x4_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64_v_f64m2x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_i64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vssseg4e64_v_i64m1_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_i64m1x4_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64_v_i64m1x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_i64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vssseg4e64_v_i64m2_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_i64m2x4_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64_v_i64m2x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_u64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vssseg4e64_v_u64m1_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64_v_u64m1x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_u64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vssseg4e64_v_u64m2_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_u64m2x4_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64_v_u64m2x4_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e8.c @@ -1,208 +1,369 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vssseg4e8_v_i8mf8(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_i8mf8x4(int8_t *base, ptrdiff_t bstride, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8_v_i8mf8x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vssseg4e8_v_i8mf4(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_i8mf4x4(int8_t *base, ptrdiff_t bstride, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8_v_i8mf4x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vssseg4e8_v_i8mf2(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_i8mf2x4(int8_t *base, ptrdiff_t bstride, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8_v_i8mf2x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vssseg4e8_v_i8m1(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_i8m1x4(int8_t *base, ptrdiff_t bstride, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8_v_i8m1x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vssseg4e8_v_i8m2(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_i8m2x4(int8_t *base, ptrdiff_t bstride, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8_v_i8m2x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vssseg4e8_v_u8mf8(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_u8mf8x4(uint8_t *base, ptrdiff_t bstride, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8_v_u8mf8x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vssseg4e8_v_u8mf4(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_u8mf4x4(uint8_t *base, ptrdiff_t bstride, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8_v_u8mf4x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vssseg4e8_v_u8mf2(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_u8mf2x4(uint8_t *base, ptrdiff_t bstride, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8_v_u8mf2x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vssseg4e8_v_u8m1(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_u8m1x4(uint8_t *base, ptrdiff_t bstride, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8_v_u8m1x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vssseg4e8_v_u8m2(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_u8m2x4(uint8_t *base, ptrdiff_t bstride, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8_v_u8m2x4(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vssseg4e8_v_i8mf8_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8_v_i8mf8x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vssseg4e8_v_i8mf4_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8_v_i8mf4x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vssseg4e8_v_i8mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8_v_i8mf2x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vssseg4e8_v_i8m1_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_i8m1x4_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8_v_i8m1x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vssseg4e8_v_i8m2_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_i8m2x4_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8_v_i8m2x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vssseg4e8_v_u8mf8_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8_v_u8mf8x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vssseg4e8_v_u8mf4_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8_v_u8mf4x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vssseg4e8_v_u8mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8_v_u8mf2x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vssseg4e8_v_u8m1_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8_v_u8m1x4_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vssseg4e8_v_u8m2_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8_v_u8m2x4_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e16.c @@ -7,183 +7,363 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vssseg5e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_f16mf4x5(_Float16 *base, ptrdiff_t bstride, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16_v_f16mf4x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vssseg5e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_f16mf2x5(_Float16 *base, ptrdiff_t bstride, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16_v_f16mf2x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vssseg5e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_f16m1x5(_Float16 *base, ptrdiff_t bstride, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16_v_f16m1x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vssseg5e16_v_i16mf4(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_i16mf4x5(int16_t *base, ptrdiff_t bstride, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16_v_i16mf4x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vssseg5e16_v_i16mf2(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_i16mf2x5(int16_t *base, ptrdiff_t bstride, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16_v_i16mf2x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vssseg5e16_v_i16m1(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_i16m1x5(int16_t *base, ptrdiff_t bstride, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16_v_i16m1x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vssseg5e16_v_u16mf4(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_u16mf4x5(uint16_t *base, ptrdiff_t bstride, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16_v_u16mf4x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vssseg5e16_v_u16mf2(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_u16mf2x5(uint16_t *base, ptrdiff_t bstride, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16_v_u16mf2x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vssseg5e16_v_u16m1(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_u16m1x5(uint16_t *base, ptrdiff_t bstride, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16_v_u16m1x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vssseg5e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16_v_f16mf4x5_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vssseg5e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16_v_f16mf2x5_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vssseg5e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_f16m1x5_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16_v_f16m1x5_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vssseg5e16_v_i16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16_v_i16mf4x5_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vssseg5e16_v_i16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16_v_i16mf2x5_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vssseg5e16_v_i16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_i16m1x5_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16_v_i16m1x5_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vssseg5e16_v_u16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16_v_u16mf4x5_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vssseg5e16_v_u16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16_v_u16mf2x5_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vssseg5e16_v_u16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16_v_u16m1x5_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e32.c @@ -7,123 +7,243 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_f32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vssseg5e32_v_f32mf2(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_f32mf2x5(float *base, ptrdiff_t bstride, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32_v_f32mf2x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_f32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vssseg5e32_v_f32m1(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_f32m1x5(float *base, ptrdiff_t bstride, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32_v_f32m1x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_i32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vssseg5e32_v_i32mf2(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_i32mf2x5(int32_t *base, ptrdiff_t bstride, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32_v_i32mf2x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_i32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vssseg5e32_v_i32m1(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_i32m1x5(int32_t *base, ptrdiff_t bstride, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32_v_i32m1x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_u32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vssseg5e32_v_u32mf2(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_u32mf2x5(uint32_t *base, ptrdiff_t bstride, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32_v_u32mf2x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_u32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vssseg5e32_v_u32m1(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_u32m1x5(uint32_t *base, ptrdiff_t bstride, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32_v_u32m1x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_f32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vssseg5e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_f32mf2x5_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32_v_f32mf2x5_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_f32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vssseg5e32_v_f32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_f32m1x5_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32_v_f32m1x5_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_i32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vssseg5e32_v_i32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32_v_i32mf2x5_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_i32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vssseg5e32_v_i32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_i32m1x5_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32_v_i32m1x5_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_u32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vssseg5e32_v_u32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32_v_u32mf2x5_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_u32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vssseg5e32_v_u32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32_v_u32m1x5_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e64.c @@ -7,63 +7,123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_f64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1f64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vssseg5e64_v_f64m1(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e64_v_f64m1x5(double *base, ptrdiff_t bstride, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e64_v_f64m1x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_i64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vssseg5e64_v_i64m1(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e64_v_i64m1x5(int64_t *base, ptrdiff_t bstride, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e64_v_i64m1x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_u64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vssseg5e64_v_u64m1(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e64_v_u64m1x5(uint64_t *base, ptrdiff_t bstride, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e64_v_u64m1x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_f64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1f64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vssseg5e64_v_f64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e64_v_f64m1x5_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e64_v_f64m1x5_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_i64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vssseg5e64_v_i64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e64_v_i64m1x5_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e64_v_i64m1x5_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_u64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vssseg5e64_v_u64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e64_v_u64m1x5_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e64_v_u64m1x5_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e8.c @@ -1,168 +1,329 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vssseg5e8_v_i8mf8(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_i8mf8x5(int8_t *base, ptrdiff_t bstride, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8_v_i8mf8x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vssseg5e8_v_i8mf4(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_i8mf4x5(int8_t *base, ptrdiff_t bstride, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8_v_i8mf4x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vssseg5e8_v_i8mf2(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_i8mf2x5(int8_t *base, ptrdiff_t bstride, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8_v_i8mf2x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vssseg5e8_v_i8m1(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_i8m1x5(int8_t *base, ptrdiff_t bstride, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8_v_i8m1x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vssseg5e8_v_u8mf8(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_u8mf8x5(uint8_t *base, ptrdiff_t bstride, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8_v_u8mf8x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vssseg5e8_v_u8mf4(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_u8mf4x5(uint8_t *base, ptrdiff_t bstride, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8_v_u8mf4x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vssseg5e8_v_u8mf2(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_u8mf2x5(uint8_t *base, ptrdiff_t bstride, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8_v_u8mf2x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vssseg5e8_v_u8m1(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_u8m1x5(uint8_t *base, ptrdiff_t bstride, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8_v_u8m1x5(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vssseg5e8_v_i8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8_v_i8mf8x5_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vssseg5e8_v_i8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8_v_i8mf4x5_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vssseg5e8_v_i8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8_v_i8mf2x5_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vssseg5e8_v_i8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_i8m1x5_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8_v_i8m1x5_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vssseg5e8_v_u8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8_v_u8mf8x5_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vssseg5e8_v_u8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8_v_u8mf4x5_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vssseg5e8_v_u8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8_v_u8mf2x5_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vssseg5e8_v_u8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8_v_u8m1x5_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e16.c @@ -7,183 +7,399 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vssseg6e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_f16mf4x6(_Float16 *base, ptrdiff_t bstride, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16_v_f16mf4x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vssseg6e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_f16mf2x6(_Float16 *base, ptrdiff_t bstride, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16_v_f16mf2x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vssseg6e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_f16m1x6(_Float16 *base, ptrdiff_t bstride, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16_v_f16m1x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vssseg6e16_v_i16mf4(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_i16mf4x6(int16_t *base, ptrdiff_t bstride, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16_v_i16mf4x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vssseg6e16_v_i16mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_i16mf2x6(int16_t *base, ptrdiff_t bstride, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16_v_i16mf2x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vssseg6e16_v_i16m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_i16m1x6(int16_t *base, ptrdiff_t bstride, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16_v_i16m1x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vssseg6e16_v_u16mf4(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_u16mf4x6(uint16_t *base, ptrdiff_t bstride, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16_v_u16mf4x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vssseg6e16_v_u16mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_u16mf2x6(uint16_t *base, ptrdiff_t bstride, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16_v_u16mf2x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vssseg6e16_v_u16m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_u16m1x6(uint16_t *base, ptrdiff_t bstride, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16_v_u16m1x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vssseg6e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16_v_f16mf4x6_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vssseg6e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16_v_f16mf2x6_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vssseg6e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_f16m1x6_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16_v_f16m1x6_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vssseg6e16_v_i16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16_v_i16mf4x6_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vssseg6e16_v_i16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16_v_i16mf2x6_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vssseg6e16_v_i16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_i16m1x6_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16_v_i16m1x6_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vssseg6e16_v_u16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16_v_u16mf4x6_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vssseg6e16_v_u16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16_v_u16mf2x6_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vssseg6e16_v_u16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16_v_u16m1x6_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e32.c @@ -7,123 +7,267 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_f32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vssseg6e32_v_f32mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_f32mf2x6(float *base, ptrdiff_t bstride, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32_v_f32mf2x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_f32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vssseg6e32_v_f32m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_f32m1x6(float *base, ptrdiff_t bstride, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32_v_f32m1x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_i32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vssseg6e32_v_i32mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_i32mf2x6(int32_t *base, ptrdiff_t bstride, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32_v_i32mf2x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_i32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vssseg6e32_v_i32m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_i32m1x6(int32_t *base, ptrdiff_t bstride, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32_v_i32m1x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_u32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vssseg6e32_v_u32mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_u32mf2x6(uint32_t *base, ptrdiff_t bstride, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32_v_u32mf2x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_u32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vssseg6e32_v_u32m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_u32m1x6(uint32_t *base, ptrdiff_t bstride, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32_v_u32m1x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_f32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vssseg6e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_f32mf2x6_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32_v_f32mf2x6_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_f32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vssseg6e32_v_f32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_f32m1x6_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32_v_f32m1x6_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_i32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vssseg6e32_v_i32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32_v_i32mf2x6_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_i32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vssseg6e32_v_i32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_i32m1x6_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32_v_i32m1x6_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_u32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vssseg6e32_v_u32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32_v_u32mf2x6_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_u32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vssseg6e32_v_u32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32_v_u32m1x6_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e64.c @@ -7,63 +7,135 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_f64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1f64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vssseg6e64_v_f64m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e64_v_f64m1x6(double *base, ptrdiff_t bstride, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e64_v_f64m1x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_i64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vssseg6e64_v_i64m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e64_v_i64m1x6(int64_t *base, ptrdiff_t bstride, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e64_v_i64m1x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_u64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vssseg6e64_v_u64m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e64_v_u64m1x6(uint64_t *base, ptrdiff_t bstride, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e64_v_u64m1x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_f64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1f64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vssseg6e64_v_f64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e64_v_f64m1x6_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e64_v_f64m1x6_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_i64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vssseg6e64_v_i64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e64_v_i64m1x6_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e64_v_i64m1x6_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_u64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vssseg6e64_v_u64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e64_v_u64m1x6_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e8.c @@ -1,168 +1,361 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vssseg6e8_v_i8mf8(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_i8mf8x6(int8_t *base, ptrdiff_t bstride, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8_v_i8mf8x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vssseg6e8_v_i8mf4(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_i8mf4x6(int8_t *base, ptrdiff_t bstride, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8_v_i8mf4x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vssseg6e8_v_i8mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_i8mf2x6(int8_t *base, ptrdiff_t bstride, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8_v_i8mf2x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vssseg6e8_v_i8m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_i8m1x6(int8_t *base, ptrdiff_t bstride, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8_v_i8m1x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vssseg6e8_v_u8mf8(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_u8mf8x6(uint8_t *base, ptrdiff_t bstride, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8_v_u8mf8x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vssseg6e8_v_u8mf4(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_u8mf4x6(uint8_t *base, ptrdiff_t bstride, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8_v_u8mf4x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vssseg6e8_v_u8mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_u8mf2x6(uint8_t *base, ptrdiff_t bstride, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8_v_u8mf2x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vssseg6e8_v_u8m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_u8m1x6(uint8_t *base, ptrdiff_t bstride, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8_v_u8m1x6(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vssseg6e8_v_i8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8_v_i8mf8x6_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vssseg6e8_v_i8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8_v_i8mf4x6_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vssseg6e8_v_i8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8_v_i8mf2x6_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vssseg6e8_v_i8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_i8m1x6_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8_v_i8m1x6_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vssseg6e8_v_u8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8_v_u8mf8x6_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vssseg6e8_v_u8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8_v_u8mf4x6_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vssseg6e8_v_u8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8_v_u8mf2x6_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vssseg6e8_v_u8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8_v_u8m1x6_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e16.c @@ -7,183 +7,435 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vssseg7e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_f16mf4x7(_Float16 *base, ptrdiff_t bstride, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16_v_f16mf4x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vssseg7e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_f16mf2x7(_Float16 *base, ptrdiff_t bstride, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16_v_f16mf2x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vssseg7e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_f16m1x7(_Float16 *base, ptrdiff_t bstride, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16_v_f16m1x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vssseg7e16_v_i16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_i16mf4x7(int16_t *base, ptrdiff_t bstride, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16_v_i16mf4x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vssseg7e16_v_i16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_i16mf2x7(int16_t *base, ptrdiff_t bstride, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16_v_i16mf2x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vssseg7e16_v_i16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_i16m1x7(int16_t *base, ptrdiff_t bstride, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16_v_i16m1x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vssseg7e16_v_u16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_u16mf4x7(uint16_t *base, ptrdiff_t bstride, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16_v_u16mf4x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vssseg7e16_v_u16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_u16mf2x7(uint16_t *base, ptrdiff_t bstride, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16_v_u16mf2x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vssseg7e16_v_u16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_u16m1x7(uint16_t *base, ptrdiff_t bstride, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16_v_u16m1x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vssseg7e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16_v_f16mf4x7_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vssseg7e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16_v_f16mf2x7_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vssseg7e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_f16m1x7_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16_v_f16m1x7_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vssseg7e16_v_i16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16_v_i16mf4x7_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vssseg7e16_v_i16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16_v_i16mf2x7_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vssseg7e16_v_i16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_i16m1x7_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16_v_i16m1x7_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vssseg7e16_v_u16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16_v_u16mf4x7_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vssseg7e16_v_u16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16_v_u16mf2x7_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vssseg7e16_v_u16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16_v_u16m1x7_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e32.c @@ -7,123 +7,291 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_f32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vssseg7e32_v_f32mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_f32mf2x7(float *base, ptrdiff_t bstride, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32_v_f32mf2x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_f32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vssseg7e32_v_f32m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_f32m1x7(float *base, ptrdiff_t bstride, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32_v_f32m1x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_i32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vssseg7e32_v_i32mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_i32mf2x7(int32_t *base, ptrdiff_t bstride, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32_v_i32mf2x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_i32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vssseg7e32_v_i32m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_i32m1x7(int32_t *base, ptrdiff_t bstride, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32_v_i32m1x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_u32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vssseg7e32_v_u32mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_u32mf2x7(uint32_t *base, ptrdiff_t bstride, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32_v_u32mf2x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_u32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vssseg7e32_v_u32m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_u32m1x7(uint32_t *base, ptrdiff_t bstride, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32_v_u32m1x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_f32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vssseg7e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_f32mf2x7_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32_v_f32mf2x7_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_f32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vssseg7e32_v_f32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_f32m1x7_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32_v_f32m1x7_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_i32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vssseg7e32_v_i32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32_v_i32mf2x7_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_i32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vssseg7e32_v_i32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_i32m1x7_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32_v_i32m1x7_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_u32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vssseg7e32_v_u32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32_v_u32mf2x7_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_u32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vssseg7e32_v_u32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32_v_u32m1x7_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e64.c @@ -7,63 +7,147 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_f64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1f64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vssseg7e64_v_f64m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e64_v_f64m1x7(double *base, ptrdiff_t bstride, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e64_v_f64m1x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_i64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vssseg7e64_v_i64m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e64_v_i64m1x7(int64_t *base, ptrdiff_t bstride, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e64_v_i64m1x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_u64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vssseg7e64_v_u64m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e64_v_u64m1x7(uint64_t *base, ptrdiff_t bstride, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e64_v_u64m1x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_f64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1f64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vssseg7e64_v_f64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e64_v_f64m1x7_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e64_v_f64m1x7_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_i64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vssseg7e64_v_i64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e64_v_i64m1x7_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e64_v_i64m1x7_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_u64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vssseg7e64_v_u64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e64_v_u64m1x7_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e64_v_u64m1x7_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e8.c @@ -1,168 +1,393 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vssseg7e8_v_i8mf8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_i8mf8x7(int8_t *base, ptrdiff_t bstride, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8_v_i8mf8x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vssseg7e8_v_i8mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_i8mf4x7(int8_t *base, ptrdiff_t bstride, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8_v_i8mf4x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vssseg7e8_v_i8mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_i8mf2x7(int8_t *base, ptrdiff_t bstride, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8_v_i8mf2x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vssseg7e8_v_i8m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_i8m1x7(int8_t *base, ptrdiff_t bstride, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8_v_i8m1x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vssseg7e8_v_u8mf8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_u8mf8x7(uint8_t *base, ptrdiff_t bstride, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8_v_u8mf8x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vssseg7e8_v_u8mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_u8mf4x7(uint8_t *base, ptrdiff_t bstride, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8_v_u8mf4x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vssseg7e8_v_u8mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_u8mf2x7(uint8_t *base, ptrdiff_t bstride, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8_v_u8mf2x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vssseg7e8_v_u8m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_u8m1x7(uint8_t *base, ptrdiff_t bstride, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8_v_u8m1x7(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vssseg7e8_v_i8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8_v_i8mf8x7_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vssseg7e8_v_i8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8_v_i8mf4x7_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vssseg7e8_v_i8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8_v_i8mf2x7_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vssseg7e8_v_i8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_i8m1x7_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8_v_i8m1x7_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vssseg7e8_v_u8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8_v_u8mf8x7_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vssseg7e8_v_u8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8_v_u8mf4x7_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vssseg7e8_v_u8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8_v_u8mf2x7_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vssseg7e8_v_u8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8_v_u8m1x7_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e16.c @@ -7,183 +7,471 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vssseg8e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_f16mf4x8(_Float16 *base, ptrdiff_t bstride, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16_v_f16mf4x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vssseg8e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_f16mf2x8(_Float16 *base, ptrdiff_t bstride, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16_v_f16mf2x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vssseg8e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_f16m1x8(_Float16 *base, ptrdiff_t bstride, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16_v_f16m1x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vssseg8e16_v_i16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_i16mf4x8(int16_t *base, ptrdiff_t bstride, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16_v_i16mf4x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vssseg8e16_v_i16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_i16mf2x8(int16_t *base, ptrdiff_t bstride, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16_v_i16mf2x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vssseg8e16_v_i16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_i16m1x8(int16_t *base, ptrdiff_t bstride, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16_v_i16m1x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vssseg8e16_v_u16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_u16mf4x8(uint16_t *base, ptrdiff_t bstride, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16_v_u16mf4x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vssseg8e16_v_u16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_u16mf2x8(uint16_t *base, ptrdiff_t bstride, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16_v_u16mf2x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vssseg8e16_v_u16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_u16m1x8(uint16_t *base, ptrdiff_t bstride, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16_v_u16m1x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vssseg8e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16_v_f16mf4x8_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vssseg8e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16_v_f16mf2x8_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vssseg8e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_f16m1x8_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16_v_f16m1x8_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vssseg8e16_v_i16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16_v_i16mf4x8_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vssseg8e16_v_i16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16_v_i16mf2x8_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vssseg8e16_v_i16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_i16m1x8_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16_v_i16m1x8_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vssseg8e16_v_u16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16_v_u16mf4x8_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vssseg8e16_v_u16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16_v_u16mf2x8_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vssseg8e16_v_u16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16_v_u16m1x8_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e32.c @@ -7,123 +7,315 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_f32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vssseg8e32_v_f32mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_f32mf2x8(float *base, ptrdiff_t bstride, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32_v_f32mf2x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_f32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vssseg8e32_v_f32m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_f32m1x8(float *base, ptrdiff_t bstride, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32_v_f32m1x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_i32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vssseg8e32_v_i32mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_i32mf2x8(int32_t *base, ptrdiff_t bstride, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32_v_i32mf2x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_i32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vssseg8e32_v_i32m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_i32m1x8(int32_t *base, ptrdiff_t bstride, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32_v_i32m1x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_u32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vssseg8e32_v_u32mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_u32mf2x8(uint32_t *base, ptrdiff_t bstride, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32_v_u32mf2x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_u32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vssseg8e32_v_u32m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_u32m1x8(uint32_t *base, ptrdiff_t bstride, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32_v_u32m1x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_f32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vssseg8e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_f32mf2x8_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32_v_f32mf2x8_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_f32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vssseg8e32_v_f32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_f32m1x8_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32_v_f32m1x8_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_i32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vssseg8e32_v_i32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32_v_i32mf2x8_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_i32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vssseg8e32_v_i32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_i32m1x8_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32_v_i32m1x8_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_u32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vssseg8e32_v_u32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32_v_u32mf2x8_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_u32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vssseg8e32_v_u32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32_v_u32m1x8_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e64.c @@ -7,63 +7,159 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_f64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1f64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vssseg8e64_v_f64m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e64_v_f64m1x8(double *base, ptrdiff_t bstride, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e64_v_f64m1x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_i64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vssseg8e64_v_i64m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e64_v_i64m1x8(int64_t *base, ptrdiff_t bstride, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e64_v_i64m1x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_u64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vssseg8e64_v_u64m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e64_v_u64m1x8(uint64_t *base, ptrdiff_t bstride, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e64_v_u64m1x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_f64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1f64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vssseg8e64_v_f64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e64_v_f64m1x8_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e64_v_f64m1x8_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_i64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vssseg8e64_v_i64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e64_v_i64m1x8_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e64_v_i64m1x8_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_u64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vssseg8e64_v_u64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e64_v_u64m1x8_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e64_v_u64m1x8_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e8.c @@ -1,168 +1,425 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vssseg8e8_v_i8mf8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_i8mf8x8(int8_t *base, ptrdiff_t bstride, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8_v_i8mf8x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vssseg8e8_v_i8mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_i8mf4x8(int8_t *base, ptrdiff_t bstride, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8_v_i8mf4x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vssseg8e8_v_i8mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_i8mf2x8(int8_t *base, ptrdiff_t bstride, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8_v_i8mf2x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vssseg8e8_v_i8m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_i8m1x8(int8_t *base, ptrdiff_t bstride, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8_v_i8m1x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vssseg8e8_v_u8mf8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_u8mf8x8(uint8_t *base, ptrdiff_t bstride, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8_v_u8mf8x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vssseg8e8_v_u8mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_u8mf4x8(uint8_t *base, ptrdiff_t bstride, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8_v_u8mf4x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vssseg8e8_v_u8mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_u8mf2x8(uint8_t *base, ptrdiff_t bstride, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8_v_u8mf2x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vssseg8e8_v_u8m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_u8m1x8(uint8_t *base, ptrdiff_t bstride, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8_v_u8m1x8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vssseg8e8_v_i8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8_v_i8mf8x8_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vssseg8e8_v_i8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8_v_i8mf4x8_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vssseg8e8_v_i8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8_v_i8mf2x8_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vssseg8e8_v_i8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_i8m1x8_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8_v_i8m1x8_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vssseg8e8_v_u8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8_v_u8mf8x8_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vssseg8e8_v_u8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8_v_u8mf4x8_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vssseg8e8_v_u8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8_v_u8mf2x8_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vssseg8e8_v_u8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8_v_u8m1x8_m(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e16.c @@ -7,303 +7,423 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_f16mf4x2(_Float16 *base, ptrdiff_t bstride, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2f16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_f16mf2x2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4f16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_f16m1x2(_Float16 *base, ptrdiff_t bstride, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8f16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_f16m2x2(_Float16 *base, ptrdiff_t bstride, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16f16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_f16m4(_Float16 *base, ptrdiff_t bstride, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_f16m4x2(_Float16 *base, ptrdiff_t bstride, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_i16mf4x2(int16_t *base, ptrdiff_t bstride, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_i16mf2x2(int16_t *base, ptrdiff_t bstride, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_i16m1x2(int16_t *base, ptrdiff_t bstride, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_i16m2x2(int16_t *base, ptrdiff_t bstride, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16m4(int16_t *base, ptrdiff_t bstride, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_i16m4x2(int16_t *base, ptrdiff_t bstride, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_u16mf4x2(uint16_t *base, ptrdiff_t bstride, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_u16mf2x2(uint16_t *base, ptrdiff_t bstride, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_u16m1x2(uint16_t *base, ptrdiff_t bstride, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_u16m2x2(uint16_t *base, ptrdiff_t bstride, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16m4(uint16_t *base, ptrdiff_t bstride, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v0, v1, vl); +void test_vssseg2e16_v_u16m4x2(uint16_t *base, ptrdiff_t bstride, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2f16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4f16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_f16m1x2_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8f16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_f16m2x2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_f16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16f16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_f16m4_m(vbool4_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_f16m4x2_m(vbool4_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_i16m1x2_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_i16m2x2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_i16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16m4_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_i16m4x2_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e16_v_u16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16m4_m(vbool4_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e32.c @@ -7,243 +7,339 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_f32mf2x2(float *base, ptrdiff_t bstride, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2f32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_f32m1x2(float *base, ptrdiff_t bstride, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4f32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_f32m2x2(float *base, ptrdiff_t bstride, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8f32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32m4(float *base, ptrdiff_t bstride, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_f32m4x2(float *base, ptrdiff_t bstride, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_i32mf2x2(int32_t *base, ptrdiff_t bstride, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_i32m1x2(int32_t *base, ptrdiff_t bstride, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_i32m2x2(int32_t *base, ptrdiff_t bstride, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32m4(int32_t *base, ptrdiff_t bstride, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_i32m4x2(int32_t *base, ptrdiff_t bstride, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_u32mf2x2(uint32_t *base, ptrdiff_t bstride, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_u32m1x2(uint32_t *base, ptrdiff_t bstride, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_u32m2x2(uint32_t *base, ptrdiff_t bstride, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32m4(uint32_t *base, ptrdiff_t bstride, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v0, v1, vl); +void test_vssseg2e32_v_u32m4x2(uint32_t *base, ptrdiff_t bstride, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_f32mf2x2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2f32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_f32m1x2_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4f32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_f32m2x2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8f32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32m4_m(vbool8_t mask, float *base, ptrdiff_t bstride, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_f32m4x2_m(vbool8_t mask, float *base, ptrdiff_t bstride, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_i32m1x2_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_i32m2x2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32m4_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_i32m4x2_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32m4_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v0, v1, vl); +void test_vssseg2e32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e32_tuple.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e32_tuple.c deleted file mode 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e32_tuple.c +++ /dev/null @@ -1,345 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 -// REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ -// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ -// RUN: FileCheck --check-prefix=CHECK-RV64 %s - -#include - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_f32mf2x2(float *base, ptrdiff_t bstride, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_f32m1x2(float *base, ptrdiff_t bstride, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_f32m2x2(float *base, ptrdiff_t bstride, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_f32m4x2(float *base, ptrdiff_t bstride, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_i32mf2x2(int32_t *base, ptrdiff_t bstride, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_i32m1x2(int32_t *base, ptrdiff_t bstride, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_i32m2x2(int32_t *base, ptrdiff_t bstride, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_i32m4x2(int32_t *base, ptrdiff_t bstride, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_u32mf2x2(uint32_t *base, ptrdiff_t bstride, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_u32m1x2(uint32_t *base, ptrdiff_t bstride, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_u32m2x2(uint32_t *base, ptrdiff_t bstride, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_u32m4x2(uint32_t *base, ptrdiff_t bstride, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_f32mf2x2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_f32m1x2_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_f32m2x2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_f32m4x2_m(vbool8_t mask, float *base, ptrdiff_t bstride, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_i32m1x2_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_i32m2x2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_i32m4x2_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); -} - diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e64.c @@ -7,183 +7,255 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vssseg2e64(base, bstride, v0, v1, vl); +void test_vssseg2e64_v_f64m1x2(double *base, ptrdiff_t bstride, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2f64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vssseg2e64(base, bstride, v0, v1, vl); +void test_vssseg2e64_v_f64m2x2(double *base, ptrdiff_t bstride, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4f64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_f64m4(double *base, ptrdiff_t bstride, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vssseg2e64(base, bstride, v0, v1, vl); +void test_vssseg2e64_v_f64m4x2(double *base, ptrdiff_t bstride, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vssseg2e64(base, bstride, v0, v1, vl); +void test_vssseg2e64_v_i64m1x2(int64_t *base, ptrdiff_t bstride, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vssseg2e64(base, bstride, v0, v1, vl); +void test_vssseg2e64_v_i64m2x2(int64_t *base, ptrdiff_t bstride, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_i64m4(int64_t *base, ptrdiff_t bstride, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vssseg2e64(base, bstride, v0, v1, vl); +void test_vssseg2e64_v_i64m4x2(int64_t *base, ptrdiff_t bstride, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vssseg2e64(base, bstride, v0, v1, vl); +void test_vssseg2e64_v_u64m1x2(uint64_t *base, ptrdiff_t bstride, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vssseg2e64(base, bstride, v0, v1, vl); +void test_vssseg2e64_v_u64m2x2(uint64_t *base, ptrdiff_t bstride, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_u64m4(uint64_t *base, ptrdiff_t bstride, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vssseg2e64(base, bstride, v0, v1, vl); +void test_vssseg2e64_v_u64m4x2(uint64_t *base, ptrdiff_t bstride, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vssseg2e64(mask, base, bstride, v0, v1, vl); +void test_vssseg2e64_v_f64m1x2_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2f64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vssseg2e64(mask, base, bstride, v0, v1, vl); +void test_vssseg2e64_v_f64m2x2_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_f64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4f64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_f64m4_m(vbool16_t mask, double *base, ptrdiff_t bstride, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vssseg2e64(mask, base, bstride, v0, v1, vl); +void test_vssseg2e64_v_f64m4x2_m(vbool16_t mask, double *base, ptrdiff_t bstride, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vssseg2e64(mask, base, bstride, v0, v1, vl); +void test_vssseg2e64_v_i64m1x2_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vssseg2e64(mask, base, bstride, v0, v1, vl); +void test_vssseg2e64_v_i64m2x2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_i64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_i64m4_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vssseg2e64(mask, base, bstride, v0, v1, vl); +void test_vssseg2e64_v_i64m4x2_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vssseg2e64(mask, base, bstride, v0, v1, vl); +void test_vssseg2e64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vssseg2e64(mask, base, bstride, v0, v1, vl); +void test_vssseg2e64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e64_v_u64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_u64m4_m(vbool16_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vssseg2e64(mask, base, bstride, v0, v1, vl); +void test_vssseg2e64_v_u64m4x2_m(vbool16_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e64(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e8.c @@ -1,248 +1,345 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8mf8x2(int8_t *base, ptrdiff_t bstride, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8mf4x2(int8_t *base, ptrdiff_t bstride, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8mf2x2(int8_t *base, ptrdiff_t bstride, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8m1x2(int8_t *base, ptrdiff_t bstride, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8m2x2(int8_t *base, ptrdiff_t bstride, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8m4(int8_t *base, ptrdiff_t bstride, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8m4x2(int8_t *base, ptrdiff_t bstride, vint8m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8mf8x2(uint8_t *base, ptrdiff_t bstride, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8mf4x2(uint8_t *base, ptrdiff_t bstride, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8mf2x2(uint8_t *base, ptrdiff_t bstride, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8m1x2(uint8_t *base, ptrdiff_t bstride, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8m2x2(uint8_t *base, ptrdiff_t bstride, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8m4(uint8_t *base, ptrdiff_t bstride, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8m4x2(uint8_t *base, ptrdiff_t bstride, vuint8m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8m1x2_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8m2x2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_i8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8m4_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_i8m4x2_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride, vint8m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e8_v_u8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8m4_m(vbool2_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v0, v1, vl); +void test_vssseg2e8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e16.c @@ -7,243 +7,387 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1f16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_f16mf4x3(_Float16 *base, ptrdiff_t bstride, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2f16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_f16mf2x3(_Float16 *base, ptrdiff_t bstride, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4f16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_f16m1x3(_Float16 *base, ptrdiff_t bstride, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8f16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_f16m2x3(_Float16 *base, ptrdiff_t bstride, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_i16mf4x3(int16_t *base, ptrdiff_t bstride, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_i16mf2x3(int16_t *base, ptrdiff_t bstride, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_i16m1x3(int16_t *base, ptrdiff_t bstride, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_i16m2x3(int16_t *base, ptrdiff_t bstride, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_u16mf4x3(uint16_t *base, ptrdiff_t bstride, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_u16mf2x3(uint16_t *base, ptrdiff_t bstride, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_u16m1x3(uint16_t *base, ptrdiff_t bstride, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_u16m2x3(uint16_t *base, ptrdiff_t bstride, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1f16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2f16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4f16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_f16m1x3_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_f16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8f16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_f16m2x3_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_i16m1x3_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_i16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_i16m2x3_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e16_v_u16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e32.c @@ -7,183 +7,291 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1f32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vssseg3e32(base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_f32mf2x3(float *base, ptrdiff_t bstride, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2f32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vssseg3e32(base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_f32m1x3(float *base, ptrdiff_t bstride, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4f32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vssseg3e32(base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_f32m2x3(float *base, ptrdiff_t bstride, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vssseg3e32(base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_i32mf2x3(int32_t *base, ptrdiff_t bstride, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vssseg3e32(base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_i32m1x3(int32_t *base, ptrdiff_t bstride, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vssseg3e32(base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_i32m2x3(int32_t *base, ptrdiff_t bstride, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vssseg3e32(base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_u32mf2x3(uint32_t *base, ptrdiff_t bstride, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vssseg3e32(base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_u32m1x3(uint32_t *base, ptrdiff_t bstride, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vssseg3e32(base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_u32m2x3(uint32_t *base, ptrdiff_t bstride, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1f32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vssseg3e32(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_f32mf2x3_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2f32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vssseg3e32(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_f32m1x3_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_f32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4f32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vssseg3e32(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_f32m2x3_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vssseg3e32(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vssseg3e32(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_i32m1x3_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_i32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vssseg3e32(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_i32m2x3_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vssseg3e32(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vssseg3e32(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e32_v_u32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vssseg3e32(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e32(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e64.c @@ -7,123 +7,195 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_f64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1f64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vssseg3e64(base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_f64m1x3(double *base, ptrdiff_t bstride, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_f64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2f64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vssseg3e64(base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_f64m2x3(double *base, ptrdiff_t bstride, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_i64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vssseg3e64(base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_i64m1x3(int64_t *base, ptrdiff_t bstride, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_i64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vssseg3e64(base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_i64m2x3(int64_t *base, ptrdiff_t bstride, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_u64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vssseg3e64(base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_u64m1x3(uint64_t *base, ptrdiff_t bstride, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_u64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vssseg3e64(base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_u64m2x3(uint64_t *base, ptrdiff_t bstride, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_f64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1f64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vssseg3e64(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_f64m1x3_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_f64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2f64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vssseg3e64(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_f64m2x3_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_i64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vssseg3e64(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_i64m1x3_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_i64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vssseg3e64(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_i64m2x3_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_u64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vssseg3e64(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e64_v_u64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vssseg3e64(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e64_v_u64m2x3_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e64(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg3e8.c @@ -1,208 +1,329 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vssseg3e8(base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_i8mf8x3(int8_t *base, ptrdiff_t bstride, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vssseg3e8(base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_i8mf4x3(int8_t *base, ptrdiff_t bstride, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vssseg3e8(base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_i8mf2x3(int8_t *base, ptrdiff_t bstride, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vssseg3e8(base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_i8m1x3(int8_t *base, ptrdiff_t bstride, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vssseg3e8(base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_i8m2x3(int8_t *base, ptrdiff_t bstride, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vssseg3e8(base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_u8mf8x3(uint8_t *base, ptrdiff_t bstride, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vssseg3e8(base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_u8mf4x3(uint8_t *base, ptrdiff_t bstride, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vssseg3e8(base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_u8mf2x3(uint8_t *base, ptrdiff_t bstride, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vssseg3e8(base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_u8m1x3(uint8_t *base, ptrdiff_t bstride, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vssseg3e8(base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_u8m2x3(uint8_t *base, ptrdiff_t bstride, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vssseg3e8(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vssseg3e8(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vssseg3e8(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vssseg3e8(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_i8m1x3_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_i8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vssseg3e8(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_i8m2x3_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vssseg3e8(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vssseg3e8(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vssseg3e8(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vssseg3e8(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg3e8_v_u8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vssseg3e8(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vssseg3e8(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e16.c @@ -7,243 +7,435 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_f16mf4x4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_f16mf2x4(_Float16 *base, ptrdiff_t bstride, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_f16m1x4(_Float16 *base, ptrdiff_t bstride, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_f16m2x4(_Float16 *base, ptrdiff_t bstride, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_i16mf4x4(int16_t *base, ptrdiff_t bstride, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_i16mf2x4(int16_t *base, ptrdiff_t bstride, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_i16m1x4(int16_t *base, ptrdiff_t bstride, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_i16m2x4(int16_t *base, ptrdiff_t bstride, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_u16mf4x4(uint16_t *base, ptrdiff_t bstride, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_u16mf2x4(uint16_t *base, ptrdiff_t bstride, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_u16m1x4(uint16_t *base, ptrdiff_t bstride, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_u16m2x4(uint16_t *base, ptrdiff_t bstride, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_f16m1x4_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_f16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_f16m2x4_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_i16m1x4_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_i16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_i16m2x4_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e16_v_u16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e32.c @@ -7,183 +7,327 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vssseg4e32(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_f32mf2x4(float *base, ptrdiff_t bstride, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vssseg4e32(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_f32m1x4(float *base, ptrdiff_t bstride, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vssseg4e32(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_f32m2x4(float *base, ptrdiff_t bstride, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vssseg4e32(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_i32mf2x4(int32_t *base, ptrdiff_t bstride, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vssseg4e32(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_i32m1x4(int32_t *base, ptrdiff_t bstride, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vssseg4e32(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_i32m2x4(int32_t *base, ptrdiff_t bstride, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vssseg4e32(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_u32mf2x4(uint32_t *base, ptrdiff_t bstride, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vssseg4e32(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_u32m1x4(uint32_t *base, ptrdiff_t bstride, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vssseg4e32(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_u32m2x4(uint32_t *base, ptrdiff_t bstride, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_f32mf2x4_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_f32m1x4_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_f32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_f32m2x4_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_i32m1x4_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_i32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_i32m2x4_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e32_v_u32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e32(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e64.c @@ -7,123 +7,219 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_f64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vssseg4e64(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_f64m1x4(double *base, ptrdiff_t bstride, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_f64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vssseg4e64(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_f64m2x4(double *base, ptrdiff_t bstride, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_i64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vssseg4e64(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_i64m1x4(int64_t *base, ptrdiff_t bstride, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_i64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vssseg4e64(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_i64m2x4(int64_t *base, ptrdiff_t bstride, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_u64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vssseg4e64(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_u64m1x4(uint64_t *base, ptrdiff_t bstride, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_u64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vssseg4e64(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_u64m2x4(uint64_t *base, ptrdiff_t bstride, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_f64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_f64m1x4_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_f64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_f64m2x4_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_i64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_i64m1x4_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_i64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_i64m2x4_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_u64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e64_v_u64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e64_v_u64m2x4_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e64(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg4e8.c @@ -1,208 +1,369 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vssseg4e8(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_i8mf8x4(int8_t *base, ptrdiff_t bstride, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vssseg4e8(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_i8mf4x4(int8_t *base, ptrdiff_t bstride, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vssseg4e8(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_i8mf2x4(int8_t *base, ptrdiff_t bstride, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vssseg4e8(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_i8m1x4(int8_t *base, ptrdiff_t bstride, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vssseg4e8(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_i8m2x4(int8_t *base, ptrdiff_t bstride, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vssseg4e8(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_u8mf8x4(uint8_t *base, ptrdiff_t bstride, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vssseg4e8(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_u8mf4x4(uint8_t *base, ptrdiff_t bstride, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vssseg4e8(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_u8mf2x4(uint8_t *base, ptrdiff_t bstride, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vssseg4e8(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_u8m1x4(uint8_t *base, ptrdiff_t bstride, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vssseg4e8(base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_u8m2x4(uint8_t *base, ptrdiff_t bstride, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_i8m1x4_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_i8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_i8m2x4_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg4e8_v_u8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vssseg4e8(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e16.c @@ -7,183 +7,363 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_f16mf4x5(_Float16 *base, ptrdiff_t bstride, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_f16mf2x5(_Float16 *base, ptrdiff_t bstride, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_f16m1x5(_Float16 *base, ptrdiff_t bstride, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_i16mf4x5(int16_t *base, ptrdiff_t bstride, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_i16mf2x5(int16_t *base, ptrdiff_t bstride, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_i16m1x5(int16_t *base, ptrdiff_t bstride, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_u16mf4x5(uint16_t *base, ptrdiff_t bstride, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_u16mf2x5(uint16_t *base, ptrdiff_t bstride, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_u16m1x5(uint16_t *base, ptrdiff_t bstride, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_f16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_f16m1x5_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_i16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_i16m1x5_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e16_v_u16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e16(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e32.c @@ -7,123 +7,243 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_f32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_f32mf2x5(float *base, ptrdiff_t bstride, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_f32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_f32m1x5(float *base, ptrdiff_t bstride, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_i32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_i32mf2x5(int32_t *base, ptrdiff_t bstride, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_i32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_i32m1x5(int32_t *base, ptrdiff_t bstride, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_u32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_u32mf2x5(uint32_t *base, ptrdiff_t bstride, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_u32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_u32m1x5(uint32_t *base, ptrdiff_t bstride, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_f32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_f32mf2x5_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_f32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_f32m1x5_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_i32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_i32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_i32m1x5_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_u32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e32_v_u32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e32(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e64.c @@ -7,63 +7,123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_f64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1f64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vssseg5e64(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e64_v_f64m1x5(double *base, ptrdiff_t bstride, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_i64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vssseg5e64(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e64_v_i64m1x5(int64_t *base, ptrdiff_t bstride, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_u64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vssseg5e64(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e64_v_u64m1x5(uint64_t *base, ptrdiff_t bstride, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_f64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1f64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vssseg5e64(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e64_v_f64m1x5_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_i64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vssseg5e64(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e64_v_i64m1x5_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e64_v_u64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vssseg5e64(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e64_v_u64m1x5_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e64(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg5e8.c @@ -1,168 +1,329 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_i8mf8x5(int8_t *base, ptrdiff_t bstride, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_i8mf4x5(int8_t *base, ptrdiff_t bstride, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_i8mf2x5(int8_t *base, ptrdiff_t bstride, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_i8m1x5(int8_t *base, ptrdiff_t bstride, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_u8mf8x5(uint8_t *base, ptrdiff_t bstride, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_u8mf4x5(uint8_t *base, ptrdiff_t bstride, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_u8mf2x5(uint8_t *base, ptrdiff_t bstride, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_u8m1x5(uint8_t *base, ptrdiff_t bstride, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_i8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_i8m1x5_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg5e8_v_u8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl); +void test_vssseg5e8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vssseg5e8(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e16.c @@ -7,183 +7,399 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_f16mf4x6(_Float16 *base, ptrdiff_t bstride, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_f16mf2x6(_Float16 *base, ptrdiff_t bstride, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_f16m1x6(_Float16 *base, ptrdiff_t bstride, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_i16mf4x6(int16_t *base, ptrdiff_t bstride, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_i16mf2x6(int16_t *base, ptrdiff_t bstride, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_i16m1x6(int16_t *base, ptrdiff_t bstride, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_u16mf4x6(uint16_t *base, ptrdiff_t bstride, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_u16mf2x6(uint16_t *base, ptrdiff_t bstride, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_u16m1x6(uint16_t *base, ptrdiff_t bstride, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_f16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_f16m1x6_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_i16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_i16m1x6_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e16_v_u16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e16(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e32.c @@ -7,123 +7,267 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_f32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_f32mf2x6(float *base, ptrdiff_t bstride, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_f32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_f32m1x6(float *base, ptrdiff_t bstride, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_i32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_i32mf2x6(int32_t *base, ptrdiff_t bstride, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_i32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_i32m1x6(int32_t *base, ptrdiff_t bstride, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_u32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_u32mf2x6(uint32_t *base, ptrdiff_t bstride, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_u32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_u32m1x6(uint32_t *base, ptrdiff_t bstride, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_f32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_f32mf2x6_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_f32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_f32m1x6_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_i32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_i32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_i32m1x6_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_u32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e32_v_u32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e32(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e64.c @@ -7,63 +7,135 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_f64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1f64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vssseg6e64(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e64_v_f64m1x6(double *base, ptrdiff_t bstride, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_i64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vssseg6e64(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e64_v_i64m1x6(int64_t *base, ptrdiff_t bstride, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_u64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vssseg6e64(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e64_v_u64m1x6(uint64_t *base, ptrdiff_t bstride, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_f64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1f64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vssseg6e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e64_v_f64m1x6_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_i64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vssseg6e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e64_v_i64m1x6_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_u64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vssseg6e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e64(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg6e8.c @@ -1,168 +1,361 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_i8mf8x6(int8_t *base, ptrdiff_t bstride, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_i8mf4x6(int8_t *base, ptrdiff_t bstride, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_i8mf2x6(int8_t *base, ptrdiff_t bstride, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_i8m1x6(int8_t *base, ptrdiff_t bstride, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_u8mf8x6(uint8_t *base, ptrdiff_t bstride, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_u8mf4x6(uint8_t *base, ptrdiff_t bstride, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_u8mf2x6(uint8_t *base, ptrdiff_t bstride, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_u8m1x6(uint8_t *base, ptrdiff_t bstride, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_i8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_i8m1x6_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg6e8_v_u8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +void test_vssseg6e8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vssseg6e8(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e16.c @@ -7,183 +7,435 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_f16mf4x7(_Float16 *base, ptrdiff_t bstride, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_f16mf2x7(_Float16 *base, ptrdiff_t bstride, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_f16m1x7(_Float16 *base, ptrdiff_t bstride, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_i16mf4x7(int16_t *base, ptrdiff_t bstride, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_i16mf2x7(int16_t *base, ptrdiff_t bstride, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_i16m1x7(int16_t *base, ptrdiff_t bstride, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_u16mf4x7(uint16_t *base, ptrdiff_t bstride, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_u16mf2x7(uint16_t *base, ptrdiff_t bstride, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_u16m1x7(uint16_t *base, ptrdiff_t bstride, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_f16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_f16m1x7_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_i16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_i16m1x7_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e16_v_u16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e16(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e32.c @@ -7,123 +7,291 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_f32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_f32mf2x7(float *base, ptrdiff_t bstride, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_f32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_f32m1x7(float *base, ptrdiff_t bstride, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_i32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_i32mf2x7(int32_t *base, ptrdiff_t bstride, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_i32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_i32m1x7(int32_t *base, ptrdiff_t bstride, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_u32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_u32mf2x7(uint32_t *base, ptrdiff_t bstride, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_u32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_u32m1x7(uint32_t *base, ptrdiff_t bstride, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_f32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_f32mf2x7_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_f32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_f32m1x7_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_i32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_i32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_i32m1x7_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_u32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e32_v_u32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e32(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e64.c @@ -7,63 +7,147 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_f64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1f64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vssseg7e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e64_v_f64m1x7(double *base, ptrdiff_t bstride, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_i64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vssseg7e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e64_v_i64m1x7(int64_t *base, ptrdiff_t bstride, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_u64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vssseg7e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e64_v_u64m1x7(uint64_t *base, ptrdiff_t bstride, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_f64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1f64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vssseg7e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e64_v_f64m1x7_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_i64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vssseg7e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e64_v_i64m1x7_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e64_v_u64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vssseg7e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e64_v_u64m1x7_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e64(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg7e8.c @@ -1,168 +1,393 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_i8mf8x7(int8_t *base, ptrdiff_t bstride, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_i8mf4x7(int8_t *base, ptrdiff_t bstride, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_i8mf2x7(int8_t *base, ptrdiff_t bstride, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_i8m1x7(int8_t *base, ptrdiff_t bstride, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_u8mf8x7(uint8_t *base, ptrdiff_t bstride, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_u8mf4x7(uint8_t *base, ptrdiff_t bstride, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_u8mf2x7(uint8_t *base, ptrdiff_t bstride, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_u8m1x7(uint8_t *base, ptrdiff_t bstride, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_i8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_i8m1x7_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg7e8_v_u8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vssseg7e8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vssseg7e8(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e16.c @@ -7,183 +7,471 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_f16mf4x8(_Float16 *base, ptrdiff_t bstride, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_f16mf2x8(_Float16 *base, ptrdiff_t bstride, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_f16m1x8(_Float16 *base, ptrdiff_t bstride, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_i16mf4x8(int16_t *base, ptrdiff_t bstride, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_i16mf2x8(int16_t *base, ptrdiff_t bstride, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_i16m1x8(int16_t *base, ptrdiff_t bstride, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_u16mf4x8(uint16_t *base, ptrdiff_t bstride, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_u16mf2x8(uint16_t *base, ptrdiff_t bstride, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_u16m1x8(uint16_t *base, ptrdiff_t bstride, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_f16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4f16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_f16m1x8_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_i16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_i16m1x8_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e16_v_u16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e16(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e32.c @@ -7,123 +7,315 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_f32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_f32mf2x8(float *base, ptrdiff_t bstride, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_f32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_f32m1x8(float *base, ptrdiff_t bstride, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_i32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_i32mf2x8(int32_t *base, ptrdiff_t bstride, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_i32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_i32m1x8(int32_t *base, ptrdiff_t bstride, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_u32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_u32mf2x8(uint32_t *base, ptrdiff_t bstride, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_u32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_u32m1x8(uint32_t *base, ptrdiff_t bstride, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_f32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_f32mf2x8_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_f32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2f32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_f32m1x8_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_i32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_i32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_i32m1x8_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_u32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e32_v_u32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e32(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e64.c @@ -7,63 +7,159 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_f64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1f64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vssseg8e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e64_v_f64m1x8(double *base, ptrdiff_t bstride, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_i64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vssseg8e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e64_v_i64m1x8(int64_t *base, ptrdiff_t bstride, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_u64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vssseg8e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e64_v_u64m1x8(uint64_t *base, ptrdiff_t bstride, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e64(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_f64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1f64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1f64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vssseg8e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e64_v_f64m1x8_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_i64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vssseg8e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e64_v_i64m1x8_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e64(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e64_v_u64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vssseg8e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e64_v_u64m1x8_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e64(mask, base, bstride, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg8e8.c @@ -1,168 +1,425 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_i8mf8x8(int8_t *base, ptrdiff_t bstride, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_i8mf4x8(int8_t *base, ptrdiff_t bstride, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_i8mf2x8(int8_t *base, ptrdiff_t bstride, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_i8m1x8(int8_t *base, ptrdiff_t bstride, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_u8mf8x8(uint8_t *base, ptrdiff_t bstride, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_u8mf4x8(uint8_t *base, ptrdiff_t bstride, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_u8mf2x8(uint8_t *base, ptrdiff_t bstride, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_u8m1x8(uint8_t *base, ptrdiff_t bstride, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8(base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_i8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_i8m1x8_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg8e8_v_u8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vssseg8e8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vssseg8e8(mask, base, bstride, v_tuple, vl); }